parameter_defaults: CephAnsiblePlaybookVerbosity: 3 CinderEnableIscsiBackend: false CinderEnableRbdBackend: true CinderEnableNfsBackend: false NovaEnableRbdBackend: true GlanceBackend: rbd CinderRbdPoolName: "volumes" NovaRbdPoolName: "vms" GlanceRbdPoolName: "images" CephPoolDefaultPgNum: 32 CephAnsibleDisksConfig: osd_scenario: lvm osd_objectstore: bluestore lvm_volumes: - data: '/dev/vdb' crush_device_class: 'hdd' - data: '/dev/vdc' crush_device_class: 'ssd' - data: '/dev/vdd' crush_device_class: 'hdd' - data: '/dev/vde' crush_device_class: 'ssd' - data: '/dev/vdf' crush_device_class: 'hdd' journal_size: 512 CephAnsibleExtraConfig: crush_rule_config: true create_crush_tree: true crush_rules: - name: HDD root: default type: host class: 'hdd' default: true - name: SSD root: default type: host class: 'ssd' default: false CinderRbdExtraPools: - fast_volumes CephPools: - name: images pg_num: 64 rule_name: HDD application: rbd - name: volumes pg_num: 64 rule_name: HDD application: rbd - name: vms pg_num: 64 rule_name: SSD application: rbd - name: backups pg_num: 64 rule_name: HDD application: rbd - name: fast_volumes pg_num: 64 rule_name: SSD application: rbd